Preprocessing

library(tidyverse)
library(ggdist)
library(ggside)
library(easystats)
library(patchwork)
library(brms)
source("functions.R")

df <- read.csv("../data/study2_part1.csv") |>
  rbind(read.csv("../data/study2_part2.csv")) |> 
  mutate(
    Date = lubridate::dmy(Date),
    Participant = fct_reorder(Participant, Date),
    Screen_Refresh = as.character(Screen_Refresh),
    Illusion_Side = as.factor(Illusion_Side),
    # Illusion_Effect = fct_relevel(as.factor(Illusion_Effect), "Incongruent", "Null", "Congruent"),
    Illusion_Effect = fct_relevel(as.factor(Illusion_Effect), "Incongruent", "Congruent"),
    Block = as.factor(Block),
    Education = fct_relevel(Education, "High School", "Bachelor", "Master", "Doctorate", "Other", "Prefer not to Say")
  )

# Fix precision
for(ill in unique(df$Illusion_Type)) {
  data <- df[df$Illusion_Type == ill, ] 
  i <- 10
  while (length(sort(unique(signif(data$Illusion_Difference, i)))) != 8) {
    i <- i - 1
  }
  df[df$Illusion_Type == ill, "Illusion_Difference"] <- signif(df[df$Illusion_Type == ill, "Illusion_Difference"], i)
  if (i != 10) {
    message(ill, ": Illusion_Difference values != 8. Rounded to ", i, ".")
  }
}

# Transformation
df <- df |> 
  mutate(    
    Illusion_Difference_log = log(1 + Illusion_Difference),
    Illusion_Difference_sqrt = sqrt(Illusion_Difference),
    Illusion_Difference_cbrt = Illusion_Difference**(1 / 3),
    Illusion_Strength_log = sign(Illusion_Strength) * log(1 + abs(Illusion_Strength)),
    Illusion_Strength_sqrt = sign(Illusion_Strength) * sqrt(abs(Illusion_Strength)),
    Illusion_Strength_cbrt = sign(Illusion_Strength) * (abs(Illusion_Strength)**(1 / 3))
    )

Exclusions

# plot(estimate_density(dfraw[dfraw$Participant == "60684f29dbfe1bb2059e5e27_rkqoy", "RT"]))

# Dear participant, thank you for participating in our studfy. Unfortunately, we didn't receive your data :( did something happen? Some technical issue? We would like to kindly ask you to return your participation so that we can open up more slots. Thank you in advance, and apologies for the inconvenience! 

# Dear participant, thank you for participating in our study. Unfortunately, our system detected multiple issues in your data (such as implausibly short responses, and random-like pattern of answers), which makes it unusable. We understand that you might have been in a hurry or had some other issues; we hope to open-up more slots in the future would you be interested to participate again.

# Dear participant, thank you for participating in our study. Unfortunately, our system detected multiple issues in your data (such as implausibly short responses, and random-like pattern of answers - in particular in the 2nd part of the study), which makes it unusable. We understand that you might have been in a hurry or had some other issues, and so we kindly ask you to return your participation; we hope to open-up more slots in the future would you be interested to participate again.

# Just received the results: in your case, the three most prominent issues were your response pattern that was equivalent to random (in the sequence of keystrokes) that led to a chance-level performance (that was also significantly different from the rest of the population). Moreover, your reaction time distribution was also very different from the norm, with a vast majority of implausibly short responses (i.e., that are faster than the time it takes the brain to process a visual input). This issue was even more prominent in the second block (after the break), which typically happens when participants are in a rush to finish. Finally, your overall completion time was also significantly below the average. Again, we apologize, we understand that your time is valuable, but unfortunately we run too on limited funds and can hardly spare more on unusable data. Sorry for the inconvenience, we will try to open-up more slots soon.


outliers <- c(
  # Error rate of 48.8% Very short RT
  # Prolific Status: REJECTED (06/08)
  "S46",
  # 2nd block of responses very fast
  # Prolific Status: REJECTED (15/08)
  "S221",
  # Error rate of 44% and very short RTs
  # Prolific Status: RETURN REQUESTED (22/08)
  "S154",
  # 2nd block bad, first block 1/3 bad
  # Prolific Status: RETURN REQUESTED (26/08)
  "S68",
  # Prolific Status: RETURN REQUESTED (26/08)
  "S238",
  # Prolific status: accepted (not enough proof)
  "S201"
)

partial_outliers <- c(
  # 2nd block a bit bad
  "S22", 
  # Entire 2nd block bad
  "S235",
  # Entire 2nd block bad
  "S107",
  # Half of 2nd block bad
  "S204",
  # 2nd block not good
  "S140")

We removed 6 participants upon inspection of the average error rage (when close to 50%, suggesting random answers) and/or when the reaction time distribution was implausibly fast.

For each block, we computed the error rate and, if more than 50%, we discarded the whole block (as it likely indicates that instructions got mixed up, for instance participants were selecting the smaller instead of the bigger circle).

Summary

dfsub <- df |>
  group_by(Participant) |>
  summarize(
    # n = n(),
    Error = sum(Error) / n(),
    RT_Mean = mean(RT),
    RT_SD = sd(RT),
    IPIP6_SD = mean(IPIP6_SD),
    PID5_SD = mean(PID5_SD),
  ) |>
  ungroup() |>
  arrange(desc(Error))

See the next tab for the descriptive table.

Descriptive Table

data.frame(Participant = c("Total"), t(sapply(dfsub[2:ncol(dfsub)], mean, na.rm=TRUE))) |> 
  rbind(dfsub) |> 
  knitr::kable() |>
  kableExtra::row_spec(1, italic = TRUE) |> 
  kableExtra::row_spec(which(dfsub$Participant %in% outliers) + 1, background = "#EF9A9A") |> 
  kableExtra::row_spec(which(dfsub$Participant %in% partial_outliers) + 1, background = "#FFECB3")
Participant Error RT_Mean RT_SD IPIP6_SD PID5_SD
Total 0.221 808 885 19.47 20.252
S46 0.488 311 337 NA NA
S154 0.441 322 193 17.91 16.310
S68 0.396 302 471 44.24 43.818
S238 0.374 426 190 14.12 18.098
S201 0.361 509 215 17.19 19.592
S221 0.359 417 434 19.93 21.546
S151 0.349 618 227 22.37 25.546
S107 0.348 653 919 16.26 28.366
S22 0.339 796 657 23.26 12.292
S152 0.333 702 376 25.12 31.381
S108 0.325 789 3131 16.72 15.764
S29 0.324 870 547 23.39 15.088
S73 0.324 627 312 25.51 17.170
S99 0.308 616 396 20.00 16.003
S143 0.302 878 950 32.58 46.237
S185 0.300 855 528 24.03 15.303
S219 0.299 674 303 9.79 16.050
S92 0.296 521 269 25.43 36.141
S222 0.294 1092 1806 21.51 20.646
S235 0.289 663 645 25.81 25.937
S158 0.285 1025 625 21.66 23.294
S136 0.285 885 1203 25.30 35.742
S25 0.285 1973 5500 12.74 13.112
S15 0.283 579 178 14.20 17.665
S162 0.283 766 399 22.55 23.732
S70 0.281 837 2198 14.87 11.782
S134 0.281 836 430 10.22 10.288
S156 0.280 897 512 19.71 25.947
S233 0.280 910 3118 21.64 19.944
S123 0.279 1112 1363 20.25 23.229
S62 0.279 635 1192 14.96 20.606
S211 0.277 921 565 NA NA
S28 0.277 719 570 16.66 14.078
S100 0.273 2428 64910 8.80 11.613
S215 0.272 730 269 31.39 23.540
S97 0.271 640 334 14.07 17.858
S122 0.270 744 301 22.17 18.344
S212 0.266 743 396 20.36 18.700
S130 0.264 1192 928 15.70 35.071
S199 0.264 548 287 23.51 27.102
S251 0.262 824 542 18.08 13.528
S59 0.262 534 177 18.70 22.687
S140 0.262 676 440 24.07 19.822
S228 0.261 518 170 11.49 14.032
S27 0.261 869 565 5.57 3.750
S204 0.260 683 495 28.80 33.886
S58 0.259 707 338 14.92 12.773
S209 0.258 610 343 24.12 29.622
S126 0.256 550 152 NA NA
S117 0.255 618 527 20.04 16.819
S96 0.254 788 593 22.87 20.802
S164 0.253 697 192 15.65 11.486
S171 0.251 658 330 13.83 17.351
S76 0.251 1023 1708 8.40 16.452
S33 0.251 742 308 17.10 19.336
S208 0.249 1073 741 23.46 31.817
S12 0.248 627 302 23.10 19.360
S220 0.248 731 336 22.27 34.830
S38 0.248 816 554 14.55 13.533
S224 0.248 632 280 18.79 28.858
S190 0.247 738 235 30.22 23.962
S87 0.247 589 236 22.67 21.682
S110 0.245 613 202 26.64 18.849
S246 0.245 837 559 34.20 32.197
S04 0.243 850 253 18.60 30.581
S43 0.243 956 665 39.67 10.691
S81 0.242 1354 1254 12.23 10.613
S188 0.241 990 513 37.35 28.902
S21 0.241 554 177 30.65 25.546
S119 0.241 957 575 11.22 20.846
S118 0.241 690 503 16.96 25.097
S82 0.240 940 670 16.18 21.861
S95 0.240 646 225 11.55 21.113
S197 0.240 742 290 23.66 21.259
S34 0.238 662 495 NA NA
S147 0.238 624 263 10.31 20.209
S245 0.238 1410 1660 17.48 16.718
S218 0.238 794 628 18.33 13.043
S186 0.238 578 166 27.05 19.746
S231 0.238 661 562 21.06 11.843
S83 0.238 688 600 10.66 11.609
S184 0.237 741 281 22.86 15.848
S11 0.236 674 313 15.44 17.203
S106 0.236 523 156 14.90 14.527
S56 0.235 574 241 13.95 23.418
S26 0.234 643 327 NA NA
S86 0.233 555 180 16.80 13.208
S189 0.233 655 242 18.75 23.706
S153 0.232 694 650 16.84 19.364
S216 0.232 677 381 9.28 12.562
S236 0.232 960 505 35.61 30.486
S217 0.232 1257 1101 22.51 32.215
S77 0.231 698 286 26.22 27.931
S243 0.231 1105 1223 26.47 0.268
S214 0.230 774 414 11.28 12.911
S175 0.230 660 375 14.09 21.910
S239 0.229 495 182 18.67 20.427
S206 0.229 588 180 25.44 28.050
S180 0.228 691 458 24.55 24.626
S159 0.227 676 252 14.28 10.148
S90 0.227 630 297 9.01 14.070
S210 0.226 587 292 20.87 22.090
S179 0.226 1059 2034 24.98 23.949
S114 0.225 718 392 23.58 20.815
S161 0.224 1506 12083 26.40 17.621
S61 0.224 961 541 9.56 6.791
S167 0.223 819 532 25.46 18.754
S234 0.222 584 285 13.57 16.062
S131 0.222 1116 2834 19.78 9.631
S195 0.221 622 751 18.93 16.495
S148 0.221 790 579 17.25 26.715
S248 0.220 735 313 22.95 25.236
S113 0.220 618 189 17.14 19.609
S128 0.220 1327 1557 9.45 14.825
S193 0.220 1050 686 26.51 30.186
S187 0.219 1226 928 26.97 25.541
S194 0.218 630 467 14.06 16.486
S181 0.218 846 529 14.36 12.277
S132 0.218 956 492 20.70 15.249
S79 0.218 994 920 18.06 35.409
S168 0.216 718 346 27.40 36.231
S205 0.216 678 297 18.76 27.289
S64 0.216 541 165 19.70 21.651
S170 0.216 622 191 23.11 29.009
S18 0.216 564 169 30.73 20.369
S244 0.216 1051 1213 17.75 18.528
S54 0.215 610 293 NA NA
S182 0.215 770 663 22.42 20.994
S230 0.215 844 408 15.87 34.161
S45 0.214 1446 1197 13.93 22.342
S32 0.214 757 353 11.82 13.458
S39 0.214 576 204 11.56 18.616
S166 0.213 1359 1203 22.13 30.650
S207 0.213 612 255 8.16 8.932
S09 0.212 1024 766 28.16 21.157
S55 0.212 1012 781 16.75 27.387
S42 0.212 591 244 19.86 24.394
S35 0.211 1220 2635 20.09 17.107
S253 0.210 830 338 32.56 8.925
S20 0.209 633 268 27.45 27.365
S141 0.209 783 341 31.55 30.879
S67 0.209 874 648 17.75 23.190
S165 0.208 670 341 20.86 14.250
S111 0.207 816 521 11.95 15.382
S124 0.207 512 139 11.48 11.988
S49 0.206 911 2764 33.56 27.029
S202 0.206 799 465 21.71 19.134
S103 0.205 564 155 17.76 20.196
S169 0.205 815 1063 24.52 23.650
S242 0.205 558 177 22.12 8.453
S213 0.205 741 331 13.40 19.639
S142 0.204 1030 466 35.66 20.251
S74 0.204 745 359 19.00 11.506
S135 0.204 763 274 15.46 15.635
S01 0.204 1037 576 17.61 12.056
S191 0.203 735 242 25.56 6.786
S109 0.203 602 202 22.41 31.030
S104 0.203 811 330 11.98 24.724
S40 0.203 696 547 33.17 24.215
S98 0.202 965 984 20.97 32.364
S163 0.202 588 245 20.76 18.356
S255 0.202 776 467 30.03 27.748
S203 0.202 639 298 20.92 25.938
S17 0.200 672 247 16.48 20.572
S06 0.198 638 309 20.57 13.137
S03 0.198 623 266 25.08 24.179
S23 0.197 754 388 9.92 13.102
S19 0.197 536 337 19.84 15.503
S80 0.196 884 656 22.10 16.210
S112 0.196 576 250 NA NA
S72 0.195 681 616 12.86 16.185
S94 0.195 649 223 20.92 19.930
S232 0.194 1645 983 NA NA
S89 0.194 972 612 12.46 16.094
S41 0.193 866 508 27.71 24.765
S102 0.192 640 306 13.37 22.426
S116 0.191 862 590 12.08 34.099
S69 0.191 776 480 8.71 4.533
S145 0.191 952 966 17.59 25.104
S227 0.190 739 437 16.14 18.704
S176 0.190 1107 730 13.57 19.912
S174 0.189 613 221 27.84 24.738
S157 0.189 987 1084 16.00 23.375
S36 0.189 711 261 20.65 24.704
S65 0.187 531 107 13.22 10.444
S250 0.187 707 289 13.01 19.225
S254 0.187 625 245 18.77 20.882
S88 0.187 876 514 12.28 13.085
S137 0.187 811 462 22.16 17.634
S237 0.186 939 459 17.60 17.601
S125 0.186 683 424 17.11 24.805
S31 0.185 1062 758 22.10 22.657
S178 0.185 764 360 25.17 16.105
S48 0.185 869 562 24.09 20.615
S249 0.185 940 2942 18.84 31.354
S14 0.185 933 1016 23.74 10.168
S200 0.184 702 184 14.16 15.041
S91 0.184 1072 798 14.89 15.577
S226 0.184 772 305 31.56 28.664
S173 0.184 736 572 29.20 32.209
S129 0.184 1422 1279 16.71 17.390
S105 0.183 886 537 23.91 23.768
S16 0.181 582 189 15.96 21.154
S229 0.181 1513 1338 16.38 27.243
S138 0.181 717 478 27.18 30.724
S198 0.180 896 619 11.43 25.064
S149 0.180 718 372 18.15 27.523
S60 0.179 921 527 20.16 14.074
S85 0.179 775 384 13.89 20.642
S05 0.179 1164 898 15.37 11.234
S66 0.178 741 532 17.33 21.619
S139 0.177 788 432 19.01 20.883
S241 0.177 820 530 14.75 28.453
S252 0.177 1079 1190 15.93 16.035
S50 0.176 866 686 15.94 12.815
S121 0.174 765 302 13.14 18.884
S115 0.173 873 568 15.28 19.651
S155 0.173 692 287 16.89 12.455
S47 0.173 709 294 13.68 11.964
S75 0.173 564 142 22.62 11.717
S240 0.173 706 261 20.54 17.927
S150 0.173 1090 1057 14.99 18.714
S57 0.173 764 454 16.14 17.593
S53 0.173 928 541 10.84 17.773
S08 0.172 524 115 13.83 13.569
S24 0.172 779 352 25.20 29.277
S07 0.171 1742 1499 7.95 11.638
S120 0.171 719 263 27.11 14.849
S10 0.170 851 535 21.55 18.001
S44 0.170 916 684 9.17 10.487
S84 0.165 717 237 5.96 12.652
S78 0.164 783 311 19.53 26.154
S144 0.164 932 748 16.58 26.640
S183 0.163 904 549 12.64 12.233
S225 0.163 900 460 24.99 24.262
S63 0.162 652 279 NA NA
S127 0.162 861 337 16.49 17.123
S172 0.161 805 418 28.19 13.512
S223 0.159 952 530 26.37 17.889
S133 0.159 799 393 24.46 29.976
S256 0.157 786 365 18.68 13.444
S101 0.156 733 311 13.67 16.509
S196 0.155 844 314 11.47 8.379
S51 0.154 820 523 9.39 24.930
S93 0.152 765 406 10.78 23.506
S177 0.151 795 312 28.52 35.588
S192 0.151 668 389 26.37 26.562
S37 0.150 816 379 25.32 19.748
S146 0.150 688 221 24.42 25.383
S71 0.148 849 646 9.73 14.501
S160 0.147 865 720 18.17 17.811
S02 0.146 579 211 12.57 10.432
S247 0.145 816 647 23.25 14.693
S52 0.134 979 641 27.57 37.804
S13 0.133 863 408 13.32 13.272
S30 0.118 1933 1570 23.15 13.202

Reaction Time Distribution

# RT distribution
p <- df |> 
  filter(RT < 10000) |> 
  estimate_density(select = "RT", at = c("Participant", "Block")) |>
  group_by(Participant) |>
  normalize(select = "y") |>
  ungroup() |>
  mutate(Participant = fct_relevel(Participant, as.character(dfsub$Participant))) |> 
  mutate(color = ifelse(Participant %in% outliers, "red", ifelse(Participant %in% partial_outliers, "orange", "blue"))) |>
  ggplot(aes(x = x, y = y)) +
  geom_area(data = normalize(estimate_density(filter(df, RT < 10000), select = "RT"), select = "y"), alpha = 0.2) +
  geom_line(aes(color = color, group = interaction(Participant, Block), linetype = Block)) +
  geom_vline(xintercept = 125, linetype = "dashed", color = "red") +
  scale_color_manual(values = c("red" = "#F44336", "orange" = "#FF9800", "blue" = "blue"), guide = "none") +
  scale_x_continuous(expand = c(0, 0)) +
  scale_y_continuous(expand = c(0, 0)) +
  coord_cartesian(xlim = c(0, 3000)) +
  theme_modern() +
  theme(axis.text.y = element_blank()) +
  facet_wrap(~Participant) +
  labs(y = "", x = "Reaction Time (ms)")
# p
# ggsave("figures/outliers.png", p, width=25, height=25)

# Filter out
df <- filter(df, !Participant %in% outliers)

Error Rate per Illusion Block

temp <- df |>
  group_by(Participant, Illusion_Type, Block) |>
  summarize(ErrorRate_per_block = sum(Error) / n()) |>
  ungroup() |>
  arrange(desc(ErrorRate_per_block))

temp2 <- temp |>
  filter(ErrorRate_per_block >= 0.5) |>
  group_by(Illusion_Type, Block) |>
  summarize(n = n()) |>
  arrange(desc(n), Illusion_Type) |>
  ungroup() |>
  mutate(
    n_trials = cumsum(n * 56),
    p_trials = n_trials / nrow(df)
  )

# knitr::kable(temp2)

p1 <- temp |>
  estimate_density(at = c("Illusion_Type", "Block"), method="KernSmooth") |>
  ggplot(aes(x = x, y = y)) +
  geom_line(aes(color = Illusion_Type, linetype = Block)) +
  geom_vline(xintercept = 0.5, linetype = "dashed") +
  scale_x_continuous(expand = c(0, 0)) +
  scale_y_continuous(expand = c(0, 0)) +
  labs(y = "Distribution", x = "Error Rate") +
  theme_modern()

p2 <- temp2 |>
  mutate(Block = fct_rev(Block)) |>
  ggplot(aes(x = Illusion_Type, y = p_trials)) +
  geom_bar(stat = "identity", aes(fill = Block)) +
  scale_x_discrete(expand = c(0, 0)) +
  scale_y_continuous(labels = scales::percent, expand = c(0, 0)) +
  labs(y = "Percentage of Trials Removed", x = "Illusion Type") +
  theme_modern() +
  theme(axis.text.x = element_text(angle = 45, hjust = 1))

p1 | p2



# Drop
n <- nrow(df)
df <- df |>
  group_by(Participant, Illusion_Type, Block) |>
  mutate(ErrorRate_per_block = sum(Error) / n()) |>
  ungroup() |>
  filter(ErrorRate_per_block < 0.5) |>
  select(-ErrorRate_per_block)

# Drop also participant with bad second block
df <- filter(
  df,
  !(Participant %in% partial_outliers & df$Block == 2))

rm(temp, temp2)

We removed 6888 (2.16%) trials belonging to bad blocks.

Reaction Time per Trial

df <- df |>
  group_by(Participant, Error) |>
  mutate(Outlier = ifelse(Error == 0 & (RT < 125 | standardize(RT) > 4), TRUE, FALSE)) |>
  ungroup()

p1 <- df |> 
  filter(RT < 10000) |> 
  estimate_density(select = "RT", at = "Participant") |>
  group_by(Participant) |>
  normalize(select = "y") |>
  ungroup() |>
  merge(df |>
    filter(Error == 0) |>
    group_by(Participant) |>
    summarize(Outlier = mean(RT) + 4 * sd(RT))) |>
  mutate(Outlier = ifelse(x >= Outlier, TRUE, FALSE)) |>
  ggplot(aes(x = x, y = y)) +
  geom_area(data = normalize(estimate_density(filter(df, RT < 10000), select = "RT"), select = "y"), alpha = 0.2) +
  geom_line(aes(color = Participant, linetype = Outlier), alpha=0.2) +
  geom_vline(xintercept = c(125), linetype = "dashed", color = "red") +
  scale_color_material_d("rainbow", guide = "none") +
  scale_x_continuous(expand = c(0, 0)) +
  scale_y_continuous(expand = c(0, 0)) +
  guides(linetype = "none") +
  coord_cartesian(xlim = c(0, 4000)) +
  theme_modern() +
  theme(axis.text.y = element_blank()) +
  labs(y = "", x = "Reaction Time (ms)")


p2 <- df |>
  group_by(Participant) |>
  summarize(Outlier = sum(Outlier) / n()) |>
  mutate(Participant = fct_reorder(Participant, Outlier)) |>
  ggplot(aes(x = Participant, y = Outlier)) +
  geom_bar(stat = "identity", aes(fill = Participant)) +
  scale_fill_material_d("rainbow", guide = "none") +
  scale_x_discrete(expand = c(0, 0)) +
  scale_y_continuous(expand = c(0, 0), labels = scales::percent) +
  see::theme_modern() +
  theme(axis.text.x = element_blank()) +
  labs(y = "Percentage of outlier trials")

p1 / p2

We removed 2375 (0.76%) outlier trials (125 ms < RT < 4 SD above mean).

df <- filter(df, Outlier == FALSE)
df$RT <- df$RT / 1000  # Convert to second for better model convergence

Participants

dfsub <- df |>
  group_by(Participant) |>
  select(Participant, Age, Sex, Ethnicity, Education, Nationality, Duration, Break_Duration, Device_OS, starts_with("Screen"), starts_with("IPIP6"), starts_with("PID5")) |>
  slice(1) |>
  ungroup()

The final sample included 250 participants (Mean age = 26.5, SD = 7.6, range: [18, 69]; Sex: 48.0% females, 52.0% males, 0.0% other; Education: High School, 36.40%; Bachelor, 44.00%; Master, 12.80%; Doctorate, 1.60%; Other, 4.40%; Prefer not to Say, 0.80%).

Results

# Test where ISI needs to be included
rez <- data.frame()
for(ill in c("Delboeuf", 
             "Ebbinghaus", 
             "Rod-Frame",
             "Vertical-Horizontal", 
             "Zöllner",
             "White",
             "Müller-Lyer", 
             "Ponzo", 
             "Poggendorff",
             "Contrast")) {
  print(ill)
  print("===================")
  m1 <- brms::brm(
    Error ~ poly(ISI, 2) + (poly(ISI, 2) | Participant),
    data = filter(df, Illusion_Type == ill),
    family = "bernoulli",
    algorithm="meanfield"
  )
  m2 <- brms::brm(
    brms::bf(
      RT ~ poly(ISI, 2) + (1 | Participant),
      sigma ~ poly(ISI, 2),
      beta ~ poly(ISI, 2)
    ),
    data = filter(df, Illusion_Type == ill, Error == 0),
    family = "exgaussian",
    init=0,
    iter = 4000,
    algorithm="meanfield"
  )
  rez <- rbind(
    bayestestR::describe_posterior(as.data.frame(m1))[2:3, ],
    bayestestR::describe_posterior(as.data.frame(m2))[4:9, ]
  ) |> 
    mutate(Illusion = ill) |> 
    rbind(rez)
}
for(ill in c("Delboeuf", 
             "Ebbinghaus", 
             "Rod-Frame",
             "Vertical-Horizontal", 
             "Zöllner",
             "White",
             "Müller-Lyer", 
             "Ponzo", 
             "Poggendorff",
             "Contrast")) {
  print(ill)
  model <- brms::brm(
    Error ~ t2(Illusion_Difference, Illusion_Strength, k=8, bs="cr") + (1 | Participant),
    data = filter(df, Illusion_Type == ill),
    family = "bernoulli",
    algorithm="sampling"
  )

  name <- paste0("gam_", tolower(clean_illusionName(ill)), "_err")
  assign(name, model)  # Rename with string
  save(list=name, file=paste0("models/", name, ".Rdata"))
  
  model <- brms::brm(
    brms::bf(
      RT ~ t2(Illusion_Difference, Illusion_Strength, k=8, bs="cr") + poly(ISI, 2) + (1 | Participant),
      sigma ~ t2(Illusion_Difference, Illusion_Strength, k=8, bs="cr") + poly(ISI, 2) + (1 | Participant),
      beta ~ t2(Illusion_Difference, Illusion_Strength, k=8, bs="cr") + poly(ISI, 2) + (1 | Participant)
    ),
    data = filter(df, Illusion_Type == ill, Error == 0),
    family = "exgaussian",
    init=0,
    algorithm="sampling"
  )
  
  name <- paste0("gam_", tolower(clean_illusionName(ill)), "_rt")
  assign(name, model)  # Rename with string
  save(list=name, file=paste0("models/", name, ".Rdata"))
}